xentrace: Trace p2m events
authorKeir Fraser <keir.fraser@citrix.com>
Wed, 3 Feb 2010 09:35:23 +0000 (09:35 +0000)
committerKeir Fraser <keir.fraser@citrix.com>
Wed, 3 Feb 2010 09:35:23 +0000 (09:35 +0000)
Add more tracing to aid in debugging ballooning / PoD:
* Nested page faults for EPT/NPT systems
* set_p2m_enry
* Decrease reservation (for ballooning)
* PoD populate, zero reclaim, superpage splinter

Signed-off-by: George Dunlap <george.dunlap@eu.citrix.com>
xen/arch/x86/hvm/svm/svm.c
xen/arch/x86/hvm/vmx/vmx.c
xen/arch/x86/mm/p2m.c
xen/common/memory.c
xen/include/public/trace.h

index 8b12d515f97f75d0ad50eca5b663b03feea5d341..8ef1bdb46399f46e8bf2626a65412f893b74c5ee 100644 (file)
@@ -893,6 +893,22 @@ static void svm_do_nested_pgfault(paddr_t gpa)
     mfn_t mfn;
     p2m_type_t p2mt;
 
+    if ( tb_init_done )
+    {
+        struct {
+            uint64_t gpa;
+            uint64_t mfn;
+            u32 qualification;
+            u32 p2mt;
+        } _d;
+
+        _d.gpa = gpa;
+        _d.qualification = 0;
+        _d.mfn = mfn_x(gfn_to_mfn_query(current->domain, gfn, &_d.p2mt));
+        
+        __trace_var(TRC_HVM_NPF, 0, sizeof(_d), (unsigned char *)&_d);
+    }
+
     if ( hvm_hap_nested_page_fault(gfn) )
         return;
 
index 8cf971a45c2f6928ce75ab217b8c842b1cbbbb4e..66c889bfefd852d1badff05d314f115dd97ec38c 100644 (file)
@@ -2100,6 +2100,22 @@ static void ept_handle_violation(unsigned long qualification, paddr_t gpa)
     mfn_t mfn;
     p2m_type_t p2mt;
 
+    if ( tb_init_done )
+    {
+        struct {
+            uint64_t gpa;
+            uint64_t mfn;
+            u32 qualification;
+            u32 p2mt;
+        } _d;
+
+        _d.gpa = gpa;
+        _d.qualification = qualification;
+        _d.mfn = mfn_x(gfn_to_mfn_query(current->domain, gfn, &_d.p2mt));
+        
+        __trace_var(TRC_HVM_NPF, 0, sizeof(_d), (unsigned char *)&_d);
+    }
+
     if ( (qualification & EPT_GLA_VALID) &&
          hvm_hap_nested_page_fault(gfn) )
         return;
index e57ce18d6d2e415bc5c3650ec1c5e61fdfae8bdf..5c2e12dd3737edb48407ea55e9bbf147ebfa889d 100644 (file)
@@ -829,6 +829,21 @@ p2m_pod_zero_check_superpage(struct domain *d, unsigned long gfn)
             goto out_reset;
     }
 
+    if ( tb_init_done )
+    {
+        struct {
+            u64 gfn, mfn;
+            int d:16,order:16;
+        } t;
+
+        t.gfn = gfn;
+        t.mfn = mfn_x(mfn);
+        t.d = d->domain_id;
+        t.order = 9;
+
+        __trace_var(TRC_MEM_POD_ZERO_RECLAIM, 0, sizeof(t), (unsigned char *)&t);
+    }
+
     /* Finally!  We've passed all the checks, and can add the mfn superpage
      * back on the PoD cache, and account for the new p2m PoD entries */
     p2m_pod_cache_add(d, mfn_to_page(mfn0), 9);
@@ -928,6 +943,21 @@ p2m_pod_zero_check(struct domain *d, unsigned long *gfns, int count)
         }
         else
         {
+            if ( tb_init_done )
+            {
+                struct {
+                    u64 gfn, mfn;
+                    int d:16,order:16;
+                } t;
+
+                t.gfn = gfns[i];
+                t.mfn = mfn_x(mfns[i]);
+                t.d = d->domain_id;
+                t.order = 0;
+        
+                __trace_var(TRC_MEM_POD_ZERO_RECLAIM, 0, sizeof(t), (unsigned char *)&t);
+            }
+
             /* Add to cache, and account for the new p2m PoD entry */
             p2m_pod_cache_add(d, mfn_to_page(mfns[i]), 0);
             d->arch.p2m->pod.entry_count++;
@@ -1073,6 +1103,21 @@ p2m_pod_demand_populate(struct domain *d, unsigned long gfn,
     p2md->pod.entry_count -= (1 << order); /* Lock: p2m */
     BUG_ON(p2md->pod.entry_count < 0);
 
+    if ( tb_init_done )
+    {
+        struct {
+            u64 gfn, mfn;
+            int d:16,order:16;
+        } t;
+
+        t.gfn = gfn;
+        t.mfn = mfn_x(mfn);
+        t.d = d->domain_id;
+        t.order = order;
+        
+        __trace_var(TRC_MEM_POD_POPULATE, 0, sizeof(t), (unsigned char *)&t);
+    }
+
     return 0;
 out_of_memory:
     spin_unlock(&d->page_alloc_lock);
@@ -1091,6 +1136,18 @@ remap_and_retry:
     for(i=0; i<(1<<order); i++)
         set_p2m_entry(d, gfn_aligned+i, _mfn(POPULATE_ON_DEMAND_MFN), 0,
                       p2m_populate_on_demand);
+    if ( tb_init_done )
+    {
+        struct {
+            u64 gfn;
+            int d:16;
+        } t;
+
+        t.gfn = gfn;
+        t.d = d->domain_id;
+        
+        __trace_var(TRC_MEM_POD_SUPERPAGE_SPLINTER, 0, sizeof(t), (unsigned char *)&t);
+    }
 
     return 0;
 }
@@ -1141,6 +1198,23 @@ p2m_set_entry(struct domain *d, unsigned long gfn, mfn_t mfn,
     l2_pgentry_t l2e_content;
     int rv=0;
 
+    if ( tb_init_done )
+    {
+        struct {
+            u64 gfn, mfn;
+            int p2mt;
+            int d:16,order:16;
+        } t;
+
+        t.gfn = gfn;
+        t.mfn = mfn_x(mfn);
+        t.p2mt = p2mt;
+        t.d = d->domain_id;
+        t.order = page_order;
+
+        __trace_var(TRC_MEM_SET_P2M_ENTRY, 0, sizeof(t), (unsigned char *)&t);
+    }
+
 #if CONFIG_PAGING_LEVELS >= 4
     if ( !p2m_next_level(d, &table_mfn, &table, &gfn_remainder, gfn,
                          L4_PAGETABLE_SHIFT - PAGE_SHIFT,
@@ -1225,7 +1299,7 @@ p2m_set_entry(struct domain *d, unsigned long gfn, mfn_t mfn,
     /* Success */
     rv = 1;
 
- out:
+out:
     unmap_domain_page(table);
     return rv;
 }
index 329483756a1275c723772390227bc5946ce4342e..b1db5f58885aede5a32be8359d2676fa667e9fe4 100644 (file)
@@ -28,6 +28,7 @@
 #include <xen/numa.h>
 #include <public/memory.h>
 #include <xsm/xsm.h>
+#include <xen/trace.h>
 
 struct memop_args {
     /* INPUT */
@@ -222,6 +223,20 @@ static void decrease_reservation(struct memop_args *a)
         if ( unlikely(__copy_from_guest_offset(&gmfn, a->extent_list, i, 1)) )
             goto out;
 
+        if ( tb_init_done )
+        {
+            struct {
+                u64 gfn;
+                int d:16,order:16;
+            } t;
+
+            t.gfn = gmfn;
+            t.d = a->domain->domain_id;
+            t.order = a->extent_order;
+        
+            __trace_var(TRC_MEM_DECREASE_RESERVATION, 0, sizeof(t), (unsigned char *)&t);
+        }
+
         /* See if populate-on-demand wants to handle this */
         if ( is_hvm_domain(a->domain)
              && p2m_pod_decrease_reservation(a->domain, gmfn, a->extent_order) )
index b6f112e201b62feb27cc7d04adc1f303f257d57c..9385cb75ffd9e2346ceeb996ee6b6901fffaae74 100644 (file)
 #define TRC_MEM_PAGE_GRANT_MAP      (TRC_MEM + 1)
 #define TRC_MEM_PAGE_GRANT_UNMAP    (TRC_MEM + 2)
 #define TRC_MEM_PAGE_GRANT_TRANSFER (TRC_MEM + 3)
+#define TRC_MEM_SET_P2M_ENTRY       (TRC_MEM + 4)
+#define TRC_MEM_DECREASE_RESERVATION (TRC_MEM + 5)
+#define TRC_MEM_POD_POPULATE        (TRC_MEM + 16)
+#define TRC_MEM_POD_ZERO_RECLAIM    (TRC_MEM + 17)
+#define TRC_MEM_POD_SUPERPAGE_SPLINTER (TRC_MEM + 18)
+
 
 #define TRC_PV_HYPERCALL             (TRC_PV +  1)
 #define TRC_PV_TRAP                  (TRC_PV +  3)
 #define TRC_HVM_LMSW            (TRC_HVM_HANDLER + 0x19)
 #define TRC_HVM_LMSW64          (TRC_HVM_HANDLER + TRC_64_FLAG + 0x19)
 #define TRC_HVM_INTR_WINDOW     (TRC_HVM_HANDLER + 0x20)
+#define TRC_HVM_NPF             (TRC_HVM_HANDLER + 0x21)
+
 #define TRC_HVM_IOPORT_WRITE    (TRC_HVM_HANDLER + 0x216)
 #define TRC_HVM_IOMEM_WRITE     (TRC_HVM_HANDLER + 0x217)